1 Effect of UPSTM-Based Decorrelation on Feature Discovery

1.0.1 Loading the libraries

library("FRESA.CAD")
library(readxl)
library(igraph)
library(umap)
library(tsne)
library(entropy)

op <- par(no.readonly = TRUE)
pander::panderOptions('digits', 3)
pander::panderOptions('table.split.table', 400)
pander::panderOptions('keep.trailing.zeros',TRUE)

1.1 Material and Methods

1.2 Data: The COVID_19 Data-Set

The data to process is described in:

https://zenodo.org/record/4156647#.Y1bSF3bMKUk

IR Saliva Testing Dataset

10.5281/zenodo.4156647 https://doi.org/10.5281/zenodo.4156647

I added a column to the data identifying the repeated experiments.


SalivaIR <- as.data.frame(read_excel("~/GitHub/FCA/Data/SalivaThermal_Source_Data_2.xlsx"))


SalivaIR_set1 <- subset(SalivaIR,RepID==1)
rownames(SalivaIR_set1) <- SalivaIR_set1$ID
SalivaIR_set1$RepID <- NULL
SalivaIR_set1$ID <- NULL
SalivaIR_set1$Ct <- NULL

SalivaIR_set2 <- subset(SalivaIR,RepID==2)
rownames(SalivaIR_set2) <- SalivaIR_set2$ID
SalivaIR_set2$RepID <- NULL
SalivaIR_set2$ID <- NULL
SalivaIR_set2$Ct <- NULL

SalivaIR_set3 <- subset(SalivaIR,RepID==3)
rownames(SalivaIR_set3) <- SalivaIR_set3$ID
SalivaIR_set3$RepID <- NULL
SalivaIR_set3$ID <- NULL
SalivaIR_set3$Ct <- NULL

SalivaIR_Avg <- (SalivaIR_set1 + SalivaIR_set2 + SalivaIR_set3)/3


colnames(SalivaIR_Avg) <- paste("V",colnames(SalivaIR_Avg),sep="_")

SalivaIR_Avg$class <- 1*(str_detect(rownames(SalivaIR_Avg),"P"))

pander::pander(table(SalivaIR_Avg$class))
0 1
30 31

1.2.0.1 Standarize the names for the reporting

studyName <- "IRSaliva"
dataframe <- SalivaIR_Avg
outcome <- "class"

TopVariables <- 10

thro <- 0.80
cexheat = 0.15

1.3 Generaring the report

1.3.1 Libraries

Some libraries

library(psych)
library(whitening)
library("vioplot")
library("rpart")

1.3.2 Data specs

pander::pander(c(rows=nrow(dataframe),col=ncol(dataframe)-1))
rows col
61 251
pander::pander(table(dataframe[,outcome]))
0 1
30 31

varlist <- colnames(dataframe)
varlist <- varlist[varlist != outcome]

largeSet <- length(varlist) > 1500 

1.3.3 Scaling the data

Scaling and removing near zero variance columns and highly co-linear(r>0.99999) columns


  ### Some global cleaning
  sdiszero <- apply(dataframe,2,sd) > 1.0e-16
  dataframe <- dataframe[,sdiszero]

  varlist <- colnames(dataframe)[colnames(dataframe) != outcome]
  tokeep <- c(as.character(correlated_Remove(dataframe,varlist,thr=0.99999)),outcome)
  dataframe <- dataframe[,tokeep]

  varlist <- colnames(dataframe)
  varlist <- varlist[varlist != outcome]
  
  iscontinous <- sapply(apply(dataframe,2,unique),length) >= 5 ## Only variables with enough samples



dataframeScaled <- FRESAScale(dataframe,method="OrderLogit")$scaledData

1.4 The heatmap of the data

numsub <- nrow(dataframe)
if (numsub > 1000) numsub <- 1000


if (!largeSet)
{

  hm <- heatMaps(data=dataframeScaled[1:numsub,],
                 Outcome=outcome,
                 Scale=TRUE,
                 hCluster = "row",
                 xlab="Feature",
                 ylab="Sample",
                 srtCol=45,
                 srtRow=45,
                 cexCol=cexheat,
                 cexRow=cexheat
                 )
  par(op)
}

1.4.0.1 Correlation Matrix of the Data

The heat map of the data


if (!largeSet)
{

  par(cex=0.6,cex.main=0.85,cex.axis=0.7)
  #cormat <- Rfast::cora(as.matrix(dataframe[,varlist]),large=TRUE)
  cormat <- cor(dataframe[,varlist],method="pearson")
  cormat[is.na(cormat)] <- 0
  gplots::heatmap.2(abs(cormat),
                    trace = "none",
  #                  scale = "row",
                    mar = c(5,5),
                    col=rev(heat.colors(5)),
                    main = "Original Correlation",
                    cexRow = cexheat,
                    cexCol = cexheat,
                     srtCol=45,
                     srtRow=45,
                    key.title=NA,
                    key.xlab="|Pearson Correlation|",
                    xlab="Feature", ylab="Feature")
  diag(cormat) <- 0
  print(max(abs(cormat)))
}

[1] 0.999994

1.5 The decorrelation


DEdataframe <- IDeA(dataframe,verbose=TRUE,thr=thro)
#> 
#>  Included: 224 , Uni p: 0.001116071 , Outcome-Driven Size: 0 , Base Size: 1 , Rcrit: 0.384269 
#> 
#> 
 1 <R=1.000,thr=0.900,N=  224>, Top: 1( 223 )[ 1 : 1 Fa= 1 : 0.900 ]( 1 , 223 , 0 ),<|>Tot Used: 224 , Added: 223 , Zero Std: 0 , Max Cor: 1.000
#> 
 2 <R=1.000,thr=0.900,N=  224>, Top: 5( 71 )[ 1 : 5 Fa= 6 : 0.900 ]( 5 , 158 , 1 ),<|>Tot Used: 224 , Added: 158 , Zero Std: 0 , Max Cor: 1.000
#> 
 3 <R=1.000,thr=0.900,N=  224>, Top: 11( 4 )[ 1 : 11 Fa= 17 : 0.900 ]( 11 , 149 , 6 ),<|>Tot Used: 224 , Added: 149 , Zero Std: 0 , Max Cor: 0.999
#> 
 4 <R=0.999,thr=0.900,N=  224>, Top: 21( 5 )=[ 2 : 21 Fa= 35 : 0.928 ]( 21 , 151 , 17 ),<|>Tot Used: 224 , Added: 151 , Zero Std: 0 , Max Cor: 1.000
#> 
 5 <R=1.000,thr=0.900,N=  224>, Top: 36( 4 )[ 1 : 36 Fa= 71 : 0.900 ]( 36 , 134 , 35 ),<|>Tot Used: 224 , Added: 134 , Zero Std: 0 , Max Cor: 0.997
#> 
 6 <R=0.997,thr=0.900,N=  224>, Top: 39( 4 )[ 1 : 39 Fa= 109 : 0.900 ]( 39 , 83 , 71 ),<|>Tot Used: 224 , Added: 83 , Zero Std: 0 , Max Cor: 0.995
#> 
 7 <R=0.995,thr=0.900,N=  224>, Top: 25( 2 )[ 1 : 25 Fa= 134 : 0.900 ]( 25 , 32 , 109 ),<|>Tot Used: 224 , Added: 32 , Zero Std: 0 , Max Cor: 0.985
#> 
 8 <R=0.985,thr=0.900,N=  224>, Top: 4( 1 )[ 1 : 4 Fa= 138 : 0.900 ]( 4 , 4 , 134 ),<|>Tot Used: 224 , Added: 4 , Zero Std: 0 , Max Cor: 0.899
#> 
 9 <R=0.899,thr=0.800,N=   77>, Top: 34( 1 )[ 1 : 34 Fa= 143 : 0.800 ]( 34 , 41 , 138 ),<|>Tot Used: 224 , Added: 41 , Zero Std: 0 , Max Cor: 0.993
#> 
 10 <R=0.993,thr=0.900,N=   20>, Top: 10( 1 )[ 1 : 10 Fa= 143 : 0.900 ]( 10 , 10 , 143 ),<|>Tot Used: 224 , Added: 10 , Zero Std: 0 , Max Cor: 0.995
#> 
 11 <R=0.995,thr=0.900,N=   20>, Top: 4( 1 )[ 1 : 4 Fa= 145 : 0.900 ]( 4 , 4 , 143 ),<|>Tot Used: 224 , Added: 4 , Zero Std: 0 , Max Cor: 0.950
#> 
 12 <R=0.950,thr=0.900,N=   20>, Top: 1( 1 )[ 1 : 1 Fa= 146 : 0.900 ]( 1 , 1 , 145 ),<|>Tot Used: 224 , Added: 1 , Zero Std: 0 , Max Cor: 0.893
#> 
 13 <R=0.893,thr=0.800,N=   26>, Top: 12( 1 )[ 1 : 12 Fa= 149 : 0.800 ]( 12 , 14 , 146 ),<|>Tot Used: 224 , Added: 14 , Zero Std: 0 , Max Cor: 0.977
#> 
 14 <R=0.977,thr=0.900,N=    8>, Top: 4( 1 )[ 1 : 4 Fa= 151 : 0.900 ]( 4 , 4 , 149 ),<|>Tot Used: 224 , Added: 4 , Zero Std: 0 , Max Cor: 0.862
#> 
 15 <R=0.862,thr=0.800,N=   10>, Top: 5( 1 )[ 1 : 5 Fa= 151 : 0.800 ]( 5 , 5 , 151 ),<|>Tot Used: 224 , Added: 5 , Zero Std: 0 , Max Cor: 0.944
#> 
 16 <R=0.944,thr=0.900,N=    2>, Top: 1( 1 )[ 1 : 1 Fa= 151 : 0.900 ]( 1 , 1 , 151 ),<|>Tot Used: 224 , Added: 1 , Zero Std: 0 , Max Cor: 0.842
#> 
 17 <R=0.842,thr=0.800,N=    4>, Top: 2( 1 )[ 1 : 2 Fa= 151 : 0.800 ]( 2 , 2 , 151 ),<|>Tot Used: 224 , Added: 2 , Zero Std: 0 , Max Cor: 0.994
#> 
 18 <R=0.994,thr=0.900,N=    2>, Top: 1( 1 )[ 1 : 1 Fa= 152 : 0.900 ]( 1 , 1 , 151 ),<|>Tot Used: 224 , Added: 1 , Zero Std: 0 , Max Cor: 0.891
#> 
 19 <R=0.891,thr=0.800,N=    2>, Top: 1( 1 )[ 1 : 1 Fa= 152 : 0.800 ]( 1 , 1 , 152 ),<|>Tot Used: 224 , Added: 1 , Zero Std: 0 , Max Cor: 0.896
#> 
 20 <R=0.896,thr=0.800,N=    2>, Top: 1( 1 )[ 1 : 1 Fa= 152 : 0.800 ]( 1 , 1 , 152 ),<|>Tot Used: 224 , Added: 1 , Zero Std: 0 , Max Cor: 0.799
#> 
 21 <R=0.799,thr=0.800,N=    2>
#> 
 [ 21 ], 0.7994991 Decor Dimension: 224 Nused: 224 . Cor to Base: 223 , ABase: 1 , Outcome Base: 0 
#> 
varlistc <- colnames(DEdataframe)[colnames(DEdataframe) != outcome]

pander::pander(sum(apply(dataframe[,varlist],2,var)))

5.5

pander::pander(sum(apply(DEdataframe[,varlistc],2,var)))

0.0357

pander::pander(entropy(discretize(unlist(dataframe[,varlist]), 256)))

5.08

pander::pander(entropy(discretize(unlist(DEdataframe[,varlistc]), 256)))

0.969

1.5.1 The decorrelation matrix


if (!largeSet)
{

  par(cex=0.6,cex.main=0.85,cex.axis=0.7)
  
  UPLTM <- attr(DEdataframe,"UPLTM")
  
  gplots::heatmap.2(1.0*(abs(UPLTM)>0),
                    trace = "none",
                    mar = c(5,5),
                    col=rev(heat.colors(5)),
                    main = "Decorrelation matrix",
                    cexRow = cexheat,
                    cexCol = cexheat,
                   srtCol=45,
                   srtRow=45,
                    key.title=NA,
                    key.xlab="|Beta|>0",
                    xlab="Output Feature", ylab="Input Feature")
  
  par(op)
}

1.6 The heatmap of the decorrelated data

if (!largeSet)
{

  hm <- heatMaps(data=DEdataframe[1:numsub,],
                 Outcome=outcome,
                 Scale=TRUE,
                 hCluster = "row",
                 cexRow = cexheat,
                 cexCol = cexheat,
                 srtCol=45,
                 srtRow=45,
                 xlab="Feature",
                 ylab="Sample")
  par(op)
}

1.7 The correlation matrix after decorrelation

if (!largeSet)
{

  cormat <- cor(DEdataframe[,varlistc],method="pearson")
  cormat[is.na(cormat)] <- 0
  
  gplots::heatmap.2(abs(cormat),
                    trace = "none",
                    mar = c(5,5),
                    col=rev(heat.colors(5)),
                    main = "Correlation after IDeA",
                    cexRow = cexheat,
                    cexCol = cexheat,
                     srtCol=45,
                     srtRow=45,
                    key.title=NA,
                    key.xlab="|Pearson Correlation|",
                    xlab="Feature", ylab="Feature")
  
  par(op)
  diag(cormat) <- 0
  print(max(abs(cormat)))
}

[1] 0.7994991

1.8 U-MAP Visualization of features

1.8.1 The UMAP based on LASSO on Raw Data


if (nrow(dataframe) < 1000)
{
  classes <- unique(dataframe[1:numsub,outcome])
  raincolors <- rainbow(length(classes))
  names(raincolors) <- classes
  datasetframe.umap = umap(scale(dataframe[1:numsub,varlist]),n_components=2)
  plot(datasetframe.umap$layout,xlab="U1",ylab="U2",main="UMAP: Original",t='n')
  text(datasetframe.umap$layout,labels=dataframe[1:numsub,outcome],col=raincolors[dataframe[1:numsub,outcome]+1])
}

1.8.2 The decorralted UMAP

if (nrow(dataframe) < 1000)
{

  datasetframe.umap = umap(scale(DEdataframe[1:numsub,varlistc]),n_components=2)
  plot(datasetframe.umap$layout,xlab="U1",ylab="U2",main="UMAP: After IDeA",t='n')
  text(datasetframe.umap$layout,labels=DEdataframe[1:numsub,outcome],col=raincolors[DEdataframe[1:numsub,outcome]+1])
}

1.9 Univariate Analysis

1.9.1 Univariate



univarRAW <- uniRankVar(varlist,
               paste(outcome,"~1"),
               outcome,
               dataframe,
               rankingTest="AUC")

100 : V_1064 200 : V_854




univarDe <- uniRankVar(varlistc,
               paste(outcome,"~1"),
               outcome,
               DEdataframe,
               rankingTest="AUC",
               )

100 : La_V_1064 200 : La_V_854

1.9.2 Final Table


univariate_columns <- c("caseMean","caseStd","controlMean","controlStd","controlKSP","ROCAUC")

##top variables
topvar <- c(1:length(varlist)) <= TopVariables
tableRaw <- univarRAW$orderframe[topvar,univariate_columns]
pander::pander(tableRaw)
  caseMean caseStd controlMean controlStd controlKSP ROCAUC
V_908 0.221 0.128 0.261 0.117 0.579 0.596
V_906 0.220 0.127 0.261 0.117 0.585 0.596
V_904 0.220 0.127 0.261 0.117 0.592 0.596
V_892 0.219 0.127 0.261 0.121 0.626 0.596
V_890 0.219 0.127 0.261 0.121 0.616 0.596
V_888 0.219 0.127 0.261 0.122 0.603 0.596
V_912 0.223 0.129 0.263 0.117 0.604 0.595
V_910 0.222 0.128 0.262 0.117 0.587 0.595
V_896 0.220 0.127 0.261 0.120 0.620 0.595
V_894 0.219 0.127 0.261 0.121 0.625 0.595


topLAvar <- univarDe$orderframe$Name[str_detect(univarDe$orderframe$Name,"La_")]
topLAvar <- unique(c(univarDe$orderframe$Name[topvar],topLAvar[1:as.integer(TopVariables/2)]))
finalTable <- univarDe$orderframe[topLAvar,univariate_columns]


pander::pander(finalTable)
  caseMean caseStd controlMean controlStd controlKSP ROCAUC
La_V_1054 -3.44e-03 1.80e-02 -2.04e-02 1.20e-02 0.9818 0.795
La_V_934 7.79e-05 1.13e-04 1.94e-04 2.51e-04 0.2394 0.771
La_V_1004 2.04e-04 2.47e-03 2.02e-03 1.55e-03 0.2181 0.766
La_V_916 1.52e-03 3.29e-03 3.71e-03 4.33e-03 0.0363 0.763
La_V_848 7.39e-04 2.64e-03 -5.58e-04 1.13e-03 0.3681 0.760
La_V_1266 -2.22e-03 3.07e-03 -5.09e-03 3.63e-03 0.1002 0.754
La_V_1128 3.06e-04 9.96e-04 -3.28e-04 6.81e-04 0.0627 0.743
La_V_1018 2.65e-05 9.57e-05 6.95e-05 5.35e-05 0.6595 0.743
La_V_810 1.42e-05 3.63e-05 3.79e-05 2.28e-05 0.7256 0.735
La_V_1102 -9.87e-04 7.12e-03 5.03e-03 5.69e-03 0.4870 0.734

dc <- getLatentCoefficients(DEdataframe)
fscores <- attr(DEdataframe,"fscore")


pander::pander(c(mean=mean(sapply(dc,length)),total=length(dc),fraction=length(dc)/(ncol(dataframe)-1)))
mean total fraction
5.65 223 0.996

theCharformulas <- attr(dc,"LatentCharFormulas")


finalTable <- rbind(finalTable,tableRaw[topvar[!(topvar %in% topLAvar)],univariate_columns])


orgnamez <- rownames(finalTable)
orgnamez <- str_remove_all(orgnamez,"La_")
finalTable$RAWAUC <- univarRAW$orderframe[orgnamez,"ROCAUC"]
finalTable$DecorFormula <- theCharformulas[rownames(finalTable)]
finalTable$fscores <- fscores[rownames(finalTable)]

Final_Columns <- c("DecorFormula","caseMean","caseStd","controlMean","controlStd","controlKSP","ROCAUC","RAWAUC","fscores")

finalTable <- finalTable[order(-finalTable$ROCAUC),]
pander::pander(finalTable[,Final_Columns])
  DecorFormula caseMean caseStd controlMean controlStd controlKSP ROCAUC RAWAUC fscores
La_V_1054 - (1.017)V_1138 + V_1054 - (3.036)V_986 + (3.026)V_974 -3.44e-03 1.80e-02 -2.04e-02 1.20e-02 0.9818 0.795 0.547 21
La_V_934 - (5.73e-04)V_1138 + V_934 - (1.847)V_932 + (0.847)V_930 + (1.19e-03)V_872 7.79e-05 1.13e-04 1.94e-04 2.51e-04 0.2394 0.771 0.584 -1
La_V_1004 + (0.016)V_1138 - (1.011)V_1006 + V_1004 2.04e-04 2.47e-03 2.02e-03 1.55e-03 0.2181 0.766 0.571 2
La_V_916 + (0.041)V_1138 - (0.956)V_930 + V_916 - (0.074)V_872 1.52e-03 3.29e-03 3.71e-03 4.33e-03 0.0363 0.763 0.594 10
La_V_848 - (5.65e-03)V_1138 + (0.638)V_872 - (1.615)V_860 + V_848 7.39e-04 2.64e-03 -5.58e-04 1.13e-03 0.3681 0.760 0.589 -3
La_V_1266 - (0.933)V_1282 + V_1266 - (0.077)V_1222 - (7.72e-03)V_1138 -2.22e-03 3.07e-03 -5.09e-03 3.63e-03 0.1002 0.754 0.544 2
La_V_1128 + (0.601)V_1138 - (1.601)V_1132 + V_1128 3.06e-04 9.96e-04 -3.28e-04 6.81e-04 0.0627 0.743 0.560 3
La_V_1018 + (1.95e-04)V_1138 - (0.964)V_1024 + (2.886)V_1022 - (2.920)V_1020 + V_1018 - (2.29e-03)V_872 2.65e-05 9.57e-05 6.95e-05 5.35e-05 0.6595 0.743 0.557 -5
La_V_810 + (1.20e-04)V_1138 + V_810 - (2.673)V_808 + (2.006)V_806 - (0.333)V_802 1.42e-05 3.63e-05 3.79e-05 2.28e-05 0.7256 0.735 0.569 -4
La_V_1102 - (0.278)V_1138 + V_1102 - (0.715)V_1086 -9.87e-04 7.12e-03 5.03e-03 5.69e-03 0.4870 0.734 0.560 9
V_908 NA 2.21e-01 1.28e-01 2.61e-01 1.17e-01 0.5785 0.596 0.596 NA
V_906 NA 2.20e-01 1.27e-01 2.61e-01 1.17e-01 0.5848 0.596 0.596 NA
V_904 NA 2.20e-01 1.27e-01 2.61e-01 1.17e-01 0.5918 0.596 0.596 NA
V_892 NA 2.19e-01 1.27e-01 2.61e-01 1.21e-01 0.6256 0.596 0.596 NA
V_890 NA 2.19e-01 1.27e-01 2.61e-01 1.21e-01 0.6163 0.596 0.596 NA
V_888 NA 2.19e-01 1.27e-01 2.61e-01 1.22e-01 0.6032 0.596 0.596 NA
V_912 NA 2.23e-01 1.29e-01 2.63e-01 1.17e-01 0.6041 0.595 0.595 NA
V_910 NA 2.22e-01 1.28e-01 2.62e-01 1.17e-01 0.5866 0.595 0.595 NA
V_896 NA 2.20e-01 1.27e-01 2.61e-01 1.20e-01 0.6202 0.595 0.595 NA
V_894 NA 2.19e-01 1.27e-01 2.61e-01 1.21e-01 0.6248 0.595 0.595 NA

1.10 Comparing IDeA vs PCA vs EFA

1.10.1 PCA

featuresnames <- colnames(dataframe)[colnames(dataframe) != outcome]
pc <- prcomp(dataframe[,iscontinous],center = TRUE,scale. = TRUE)   #principal components
predPCA <- predict(pc,dataframe[,iscontinous])
PCAdataframe <- as.data.frame(cbind(predPCA,dataframe[,!iscontinous]))
colnames(PCAdataframe) <- c(colnames(predPCA),colnames(dataframe)[!iscontinous]) 
#plot(PCAdataframe[,colnames(PCAdataframe)!=outcome],col=dataframe[,outcome],cex=0.65,cex.lab=0.5,cex.axis=0.75,cex.sub=0.5,cex.main=0.75)

#pander::pander(pc$rotation)


PCACor <- cor(PCAdataframe[,colnames(PCAdataframe) != outcome])


  gplots::heatmap.2(abs(PCACor),
                    trace = "none",
  #                  scale = "row",
                    mar = c(5,5),
                    col=rev(heat.colors(5)),
                    main = "PCA Correlation",
                    cexRow = 0.5,
                    cexCol = 0.5,
                     srtCol=45,
                     srtRow= -45,
                    key.title=NA,
                    key.xlab="Pearson Correlation",
                    xlab="Feature", ylab="Feature")

1.10.2 EFA


EFAdataframe <- dataframeScaled

if (length(iscontinous) < 2000)
{
  topred <- min(length(iscontinous),nrow(dataframeScaled),ncol(predPCA)/2)
  if (topred < 2) topred <- 2
  
  uls <- fa(dataframeScaled[,iscontinous],nfactors=topred,rotate="varimax",warnings=FALSE)  # EFA analysis
  predEFA <- predict(uls,dataframeScaled[,iscontinous])
  EFAdataframe <- as.data.frame(cbind(predEFA,dataframeScaled[,!iscontinous]))
  colnames(EFAdataframe) <- c(colnames(predEFA),colnames(dataframeScaled)[!iscontinous]) 


  
  EFACor <- cor(EFAdataframe[,colnames(EFAdataframe) != outcome])
  
  
    gplots::heatmap.2(abs(EFACor),
                      trace = "none",
    #                  scale = "row",
                      mar = c(5,5),
                      col=rev(heat.colors(5)),
                      main = "EFA Correlation",
                      cexRow = 0.5,
                      cexCol = 0.5,
                       srtCol=45,
                       srtRow= -45,
                      key.title=NA,
                      key.xlab="Pearson Correlation",
                      xlab="Feature", ylab="Feature")
}

1.11 Effect on CAR modeling

par(op)
par(xpd = TRUE)
dataframe[,outcome] <- factor(dataframe[,outcome])
rawmodel <- rpart(paste(outcome,"~."),dataframe,control=rpart.control(maxdepth=3))
pr <- predict(rawmodel,dataframe,type = "class")

  ptab <- list(er="Error",detail=matrix(nrow=6,ncol=1))
  if (length(unique(pr))>1)
  {
    plot(rawmodel,main="Raw",branch=0.5,uniform = TRUE,compress = TRUE,margin=0.1)
    text(rawmodel, use.n = TRUE,cex=0.75)
    ptab <- epiR::epi.tests(table(pr==0,dataframe[,outcome]==0))
  }


pander::pander(table(dataframe[,outcome],pr))
  0 1
0 30 0
1 17 14
pander::pander(ptab$detail[c(5,3,4,6),])
  statistic est lower upper
5 diag.ac 0.721 0.592 0.829
3 se 0.452 0.273 0.640
4 sp 1.000 0.884 1.000
6 diag.or Inf NA Inf

par(op)
par(xpd = TRUE)
DEdataframe[,outcome] <- factor(DEdataframe[,outcome])
IDeAmodel <- rpart(paste(outcome,"~."),DEdataframe,control=rpart.control(maxdepth=3))
pr <- predict(IDeAmodel,DEdataframe,type = "class")

  ptab <- list(er="Error",detail=matrix(nrow=6,ncol=1))
  if (length(unique(pr))>1)
  {
    plot(IDeAmodel,main="IDeA",branch=0.5,uniform = TRUE,compress = TRUE,margin=0.1)
    text(IDeAmodel, use.n = TRUE,cex=0.75)
    ptab <- epiR::epi.tests(table(pr==0,DEdataframe[,outcome]==0))
  }

pander::pander(table(DEdataframe[,outcome],pr))
  0 1
0 25 5
1 2 29
pander::pander(ptab$detail[c(5,3,4,6),])
  statistic est lower upper
5 diag.ac 0.885 0.778 0.953
3 se 0.935 0.786 0.992
4 sp 0.833 0.653 0.944
6 diag.or 72.500 12.919 406.851

par(op)
par(xpd = TRUE)
PCAdataframe[,outcome] <- factor(PCAdataframe[,outcome])
PCAmodel <- rpart(paste(outcome,"~."),PCAdataframe,control=rpart.control(maxdepth=3))
pr <- predict(PCAmodel,PCAdataframe,type = "class")
ptab <- list(er="Error",detail=matrix(nrow=6,ncol=1))
if (length(unique(pr))>1)
{
  plot(PCAmodel,main="PCA",branch=0.5,uniform = TRUE,compress = TRUE,margin=0.1)
  text(PCAmodel, use.n = TRUE,cex=0.75)
  ptab <- epiR::epi.tests(table(pr==0,PCAdataframe[,outcome]==0))
}

pander::pander(table(PCAdataframe[,outcome],pr))
  0 1
0 23 7
1 3 28
pander::pander(ptab$detail[c(5,3,4,6),])
  statistic est lower upper
5 diag.ac 0.836 0.719 0.918
3 se 0.903 0.742 0.980
4 sp 0.767 0.577 0.901
6 diag.or 30.667 7.117 132.134


par(op)

1.11.1 EFA


  EFAdataframe[,outcome] <- factor(EFAdataframe[,outcome])
  EFAmodel <- rpart(paste(outcome,"~."),EFAdataframe,control=rpart.control(maxdepth=3))
  pr <- predict(EFAmodel,EFAdataframe,type = "class")
  
  ptab <- list(er="Error",detail=matrix(nrow=6,ncol=1))
  if (length(unique(pr))>1)
  {
    plot(EFAmodel,main="EFA",branch=0.5,uniform = TRUE,compress = TRUE,margin=0.1)
    text(EFAmodel, use.n = TRUE,cex=0.75)
    ptab <- epiR::epi.tests(table(pr==0,EFAdataframe[,outcome]==0))
  }


  pander::pander(table(EFAdataframe[,outcome],pr))
  0 1
0 29 1
1 16 15
  pander::pander(ptab$detail[c(5,3,4,6),])
  statistic est lower upper
5 diag.ac 0.721 0.592 0.829
3 se 0.484 0.302 0.669
4 sp 0.967 0.828 0.999
6 diag.or 27.188 3.282 225.207
  par(op)